#example grids from package
# bell2010
# mackay1992
# fbb2003
## IMPORT CLEAN PARTICIPANT-LEVEL GRIDS
## load grid (continuous constructs only) for each participant
## #importExcel from OpenRepGrid package, creates S4 object
gridType = "short" # use "simple" or "short" where short has even shorter construct names
p4 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P04_clean.xlsx")) # researcher
p5 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P05_clean.xlsx")) # researcher
p6 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P06_clean.xlsx")) # researcher
p7 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P07_clean.xlsx")) # researcher
p15 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P15_clean.xlsx")) # researcher
p8 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P08_clean.xlsx")) # designer
p9 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P09_clean.xlsx")) # designer
p10 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P10_clean.xlsx")) # designer
p11 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P11_clean.xlsx")) # designer
p12 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P12_clean.xlsx")) # designer
p13 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P13_clean.xlsx")) # designer
p14 <- importExcel(file= paste0("data/participant_grids/", gridType,"/P14_clean.xlsx")) # designer
## IMPORT RAW CODED-DATA DATAFRAME
## row = one participant construct (elements as cols)
df_raw <- read_csv(file = "data/CODED_CONSTRUCTS.csv", na=c("", "NA"))
names <- c("RESEARCHER P4","RESEARCHER P5","RESEARCHER P6","RESEARCHER P7","RESEARCHER P15","DESIGNER P8","DESIGNER P9","DESIGNER P10","DESIGNER P11","DESIGNER P12","DESIGNER P13","DESIGNER P14")
stimuli <- c("CAREBEAR_BARS","LADY_LINE","BULLET_BARS","CARTOMAP","MAN_INFO","PENGUIN_DISTS","HISTO_DIST", "IXN_EBARS","IXN_SLOPE","BAYES_RIDGES")
## CREATE GROUP-LEVEL GRIDS
g_researchers <- p4 + p5 + p6 + p7 + p15
g_designers <- p8 + p9 + p10 + p11 + p12 + p13 + p14
## CREATE MASTER GRID
g_all <- p4 + p5 + p6 + p7 + p15 + p8 + p9 + p10 + p11 + p12 + p13 + p14
## CREATE LIST OF GRIDS
list_all <- list(p4 , p5 , p6 , p7 , p15 , p8 , p9 , p10 , p11 , p12 , p13 , p14)
## MINIMAL CODED-DATA FRAME ONLY CONTINUOUS CONSTRUCTS
df_coded <- df_raw %>%
filter(CONSTRUCT_TYPE == "NUMERIC") %>%
mutate(
PID = as.factor(PID),
SAMPLE = factor(SAMPLE),
CONSTRUCT_TYPE = factor(CONSTRUCT_TYPE),
POLE_LEFT = factor(POLE_LEFT),
POLE_RIGHT = factor(POLE_RIGHT),
POLES = paste0(POLE_LEFT,"-",POLE_RIGHT),
FIRST = factor(FIRST),
SECOND = factor(SECOND),
THIRD = factor(THIRD),
CODE_FULL = factor(CODE_STANDARD),
CODE = factor(paste0(FIRST,"(",SECOND,")")),
RELFEXIVE = as.logical(REFLEXIVE),
MATCH = as.logical(MATCH)) %>%
mutate(
across(CAREBEAR_BARS:BAYES_RIDGES, .fns = as.numeric)) %>%
select(
-(CODE_DH:CODE_STANDARD)
)
## LONG DATAFRAME
## row = one participant construct X element
df_codedElements <- df_coded %>%
pivot_longer(
cols = CAREBEAR_BARS:BAYES_RIDGES,
names_to ="ELEMENT") %>%
mutate(
value=as.numeric(value),
POLES = factor(POLES),
ELEMENT = factor(ELEMENT, levels=stimuli)
)
## CREATE DF FOR EFA
## need constructs as columns
df_eda <- df_codedElements |>
pivot_wider(
names_from=CODE,
values_from=value
)
write_csv(df_eda, file = "data/NUMERIC_CONSTRUCTS_WIDE.csv")
## LOOP all participants and render correlation heatmap on constructs
type = "correlation_heatmaps/"
topic = "construct_"
level = "participant/"
i <- 1
for (l in list_all) {
title <- names[i]
#get correlations
c <- constructCor(l, trim=50, index=FALSE, method="pearson")
#render heatmap
g <- ggcorrplot(c, show.diag = TRUE, show.legend = FALSE, type = "lower",
hc.order = FALSE, #hc.ordered using hclust
lab = TRUE, lab_size=2.5,
tl.cex = 10, tl.srt = 15
) + labs(title=paste0(title, " Construct Correlations"))
print(g)
#save plot
ggsave(g, filename = paste0("plots/",level, type, topic, title,".png"))
i <- i + 1
}
#cleanup
rm(g,c)
## LOOP all participants and render correlation heatmap on elements
type = "correlation_heatmaps/"
topic = "element_"
level = "participant/"
i <- 1
for (l in list_all) {
title <- names[i]
#get correlations
c <- elementCor(l, trim=50, index=FALSE, method="pearson")
#render heatmap
g <- ggcorrplot(c, show.diag = TRUE, show.legend = FALSE, type = "lower",
hc.order = FALSE, #hc.ordered using hclust
lab = TRUE, lab_size=2.5,
tl.cex = 10, tl.srt = 25
) + labs(title=paste0(title, " Element Correlations"))
print(g)
#save plot
ggsave(g, filename = paste0("plots/",level, type, topic, title,".png"))
i <- i + 1
}
#cleanup
rm(g,c)
#PRINT CLUSTER FOR EACH PARTICIPANT
# # calculate cluster analysis
# # https://docs.openrepgrid.org/articles/web/clustering.html
type = "cluster_dendrograms/"
topic = "construct_"
level = "participant/"
i <- 1
for (l in list_all) {
# PRINT DENDROGRAM TO FILE
title <- names[i]
png(filename=paste0("plots/",level,type,topic,title,".png"), width=10, height = 5, units = "in", res = 300)
cluster(l, along = 1,#1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
cex = 0, # Overall plot text size
lab.cex = 0.8, # Label size
main = title,
mar = c(2, 1, 2, 25),
trim=NA)
# title(main = paste0(title, " - ", "Elements"), adj = 0.05, line=-1, cex.main = 0.85)
dev.off() #jazz for saving base plots
# PRINT DENDROGRAM TO SCREEN
cluster(l, along = 1,#1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
cex = 0, # Overall plot text size
lab.cex = 0.8, # Label size
main = title,
mar = c(2, 1, 2, 25),
trim=NA)
i <- i + 1
}
#PRINT CLUSTER FOR EACH PARTICIPANT
# # calculate cluster analysis
# # https://docs.openrepgrid.org/articles/web/clustering.html
type = "cluster_dendrograms/"
topic = "element_"
level = "participant/"
i <- 1
for (l in list_all) {
# PRINT DENDROGRAM TO FILE
title <- names[i]
png(filename=paste0("plots/",level,type,topic,title,".png"),width=10, height = 4, units = "in", res = 300)
cluster(l, along = 2,#1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
cex = 0, # Overall plot text size
lab.cex = 0.8, # Label size
main = title,
mar = c(2, 1, 2, 25),
trim=NA)
# title(main = paste0(title, " - ", "Elements"), adj = 0.05, line=-1, cex.main = 0.85)
dev.off() #jazz for saving base plots
# PRINT DENDROGRAM TO SCREEN
cluster(l, along = 2,#1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
cex = 0, # Overall plot text size
lab.cex = 0.8, # Label size
main = title,
mar = c(2, 1, 2, 25),
trim=NA)
i <- i + 1
}
## PRINT SIMPLE BERTIN FOR EACH PARTICIPANT
type = "bertin_simpleplots/"
topic = "simpleBertin_"
level = "participant/"
i=1
for (l in list_all){
title = names[i]
png(filename=paste0("plots/",level,type,topic,title,".png"),width=12, height = 4, units = "in", res = 300)
# https://docs.openrepgrid.org/articles/web/visualization-bertin.html
# Adjust layout segments: make bertin display area square inside full plot
bertin(
l,
along = 0,
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
trim = 100,
main = paste0(title), # suppress internal title
colors = c("white", "black"),
lheight=0.8,
id = c(TRUE, FALSE)
)
# Overlay title in top-left
#par(fig = c(0, 1, 0.95, 1), mai = c(0, 0, 0, 0), new = TRUE)
par(fig = c(0, 1, 0.92, 1), mai = c(0, 0, 0, 0), new = TRUE)
plot.new()
title(main = title, adj = 0.05, line=-1, cex.main = 0.85)
dev.off() #jazz for saving base plots
i=i+1
}
print("ONLY SAVED TO FILE")
## [1] "ONLY SAVED TO FILE"
i <- 1
for (l in list_all) {
title_text <- names[i]
# Create square plotting device (if saving or in interactive session)
#par(fig = c(0.1,1,0.1,1), mai = c(0, 0, 0.9, 0.5), new = FALSE) # more top and right margin
par(fig = c(0, 1, 0, 1), mai = c(1.2, 4.3, 1.5, 2.5), new = FALSE)
plot.new()
#asp <- 1 # aspect ratio
# Adjust layout segments: make bertin display area square inside full plot
bertinCluster(
l,
along = 0,
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
type = "rectangle",
cex = 1,
lab.cex = 1,
trim = 50,
draw.axis = FALSE,
main = NULL, # suppress internal title
colors = c("white", "darkred"),
lheight=0.75,
id = c(TRUE, FALSE),
#xsegs = c(0.0, 0.15, 0.8, 0.91, 1), # wider center region
#ysegs = c(0.0, 0.15, 0.8, 1.0) # to match horizontal size
xsegs = c(0.0, 0.17, 0.77, 0.91, 1), # allow a little wider middle block
ysegs = c(0.0, 0.14, 0.8, 1.0) # more space to the matrix vertically
)
# Overlay title in top-left
#par(fig = c(0, 1, 0.95, 1), mai = c(0, 0, 0, 0), new = TRUE)
par(fig = c(0, 1, 0.92, 1), mai = c(0, 0, 0, 0), new = TRUE)
plot.new()
title(main = title_text, adj = 0.2, line=-1, cex.main = 0.85)
i <- i + 1
}
## TODO WRITE SIMPLE BERTIN LOOP
i=1
for (l in list_all){
par(fig = c(0,1,0,1), mai = c(0.2, 0.1, 0.1, 0.2), new = FALSE) # more top and right margin
plot.new()
title = names[i]
clustered_elements <- cluster(l,
along = 2, #1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff option
cex = 1, lab.cex = 1, main = paste0(title, " - ", "Elements")
)
clustered_constructs <- cluster(l,
along = 1, #1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff option
cex = 1, lab.cex = 1, main = paste0(title, " - ", "Constructs")
)
clustered <- cluster(l,
along = 0, #1=constructs, 2 = elements, 0 = both (default)
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff option
cex = 1, lab.cex = 1, main = paste0(title, " - ", "Both")
)
# https://docs.openrepgrid.org/articles/web/visualization-bertin.html
#bertin(clustered,
#trim=50, draw.axis = TRUE,
#colors = c("white", "darkred"))
#op <- par(fig = c(0,1,0.1,1), mai=c(0,0,0.2,0), cex.main=0.85, adj=0, new = TRUE)
#title(title)
# Create square plotting device (if saving or in interactive session)
#par(fig = c(0.1,1,0.1,1), mai = c(0, 0, 0.2, 0), new = TRUE) # more top and right margin
#par(fig = c(0, 1, 0, 1), mai = c(1.2, 4.2, 1.5, 2.5), new = FALSE)
#plot.new()
#asp <- 1 # aspect ratio
# Adjust layout segments: make bertin display area square inside full plot
bertin(
clustered,
along = 0,
dmethod = "euclidean",
cmethod = "ward.D",
align = TRUE,
trim = 50,
main = paste0(title), # suppress internal title
colors = c("white", "darkred"),
lheight=0.8,
id = c(TRUE, FALSE)
)
# Overlay title in top-left
#par(fig = c(0, 1, 0.95, 1), mai = c(0, 0, 0, 0), new = TRUE)
par(fig = c(0, 1, 0.92, 1), mai = c(0, 0, 0, 0), new = TRUE)
plot.new()
title(main = title, adj = 0.05, line=-1, cex.main = 0.85)
i<-i+1
}
## PRINT PCA BIPLOT for each participant
type = "pca_biplots/"
topic = "biplot2d_"
level = "participant/"
i=1
for (l in list_all){
## PRINT BIPLOT TO FILE
title = names[i]
png(filename=paste0("plots/",level,type,topic,title,".png"),width=8, height = 8, units = "in", res = 300)
# https://docs.openrepgrid.org/articles/web/visualization-biplot.html
biplot2d(l,
dim = c(2,1),
zoom = 1,
## construct s
c.lines = TRUE,
col.c.lines= gray(0.9),
c.label.cex = 0.5,
c.labels.inside = FALSE,
c.label.col = "blue",
## elements
# rect.margins = c(2,2),
e.point.col = "red",
e.label.col = "red",
e.label.cex = 0.5, #element label size
## size and margins
mai = c(0.2,1.5,.2,1.5),
unity=TRUE, #just makes it neater
scale.e = 0.75,
)
op <- par(# fig = c(0,1,0.5,1),
cex.main = 0.75, #title size
new = TRUE)
title(title)
dev.off()
## PRINT TO SCREEN
## PRINT PCA RESULTS
print(title)
r <- constructPca(l)
print(r, cut = 0.3)
biplot2d(l,
dim = c(2,1),
zoom = 1,
## construct s
c.lines = TRUE,
col.c.lines= gray(0.9),
c.label.cex = 0.5,
c.labels.inside = FALSE,
c.label.col = "blue",
## elements
# rect.margins = c(2,2),
e.point.col = "red",
e.label.col = "red",
e.label.cex = 0.5, #element label size
## size and margins
mai = c(0.2,1.5,.2,1.5),
unity=TRUE, #just makes it neater
scale.e = 0.75,
)
op <- par(# fig = c(0,1,0.5,1),
cex.main = 0.75, #title size
new = TRUE)
title(title)
i=i+1
}
## [1] "RESEARCHER P4"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2 RC3
## not biased - biased 0.88 -0.33 0.31
## dry - sensational 0.91 0.31
## don't trust data - trust data 0.95
## don't believe author's message - believe author's message -0.89
## low audience edu level - high edu level -0.88 0.38
## stop - scroll 0.96
##
## RC1 RC2 RC3
## SS loadings 3.19 1.25 1.17
## Proportion Var 0.53 0.21 0.19
## Cumulative Var 0.53 0.74 0.93
## [1] "RESEARCHER P5"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2 RC3
## not pro designers - pro designers 0.98
## no skin in the game - skin in the game 0.44 0.69 0.49
## informative - persuasive -0.89 0.33
## don't trust data - trust data 0.89
## don't trust author - trust author 0.94
## low author competancy - high author competancy 0.95
## general audience - niche audience 0.97
## less educated audience - more educated audience 0.95
## ignore - stop and look 0.93 0.32
##
## RC1 RC2 RC3
## SS loadings 4.53 2.44 1.50
## Proportion Var 0.50 0.27 0.17
## Cumulative Var 0.50 0.77 0.94
## [1] "RESEARCHER P6"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1
## viewer no data viz training - viewer data viz training 0.93
## target audience ignore - target audience look
## participant ignore - participant look 0.91
## author less design skills - author more design skills
## author less stats skills - author more stats skills 0.99
## author ineffective conveying message - author effective conveying message
## RC2
## viewer no data viz training - viewer data viz training
## target audience ignore - target audience look 0.96
## participant ignore - participant look
## author less design skills - author more design skills
## author less stats skills - author more stats skills
## author ineffective conveying message - author effective conveying message 0.88
## RC3
## viewer no data viz training - viewer data viz training
## target audience ignore - target audience look
## participant ignore - participant look
## author less design skills - author more design skills 0.97
## author less stats skills - author more stats skills
## author ineffective conveying message - author effective conveying message 0.30
##
## RC1 RC2 RC3
## SS loadings 2.71 1.81 1.06
## Proportion Var 0.45 0.30 0.18
## Cumulative Var 0.45 0.75 0.93
## [1] "RESEARCHER P7"
## Warning in cor.smooth(r): Matrix was not positive definite, smoothing was done
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2 RC3
## informal - formal -0.99
## does not care about design - care about design 0.62 0.59 0.48
## does not care about data - care about data -0.98
## author low des skills - author high des skills 0.84 0.48
## author did not try - author tried 0.79 0.53
## would not change based on context - would change -0.34 -0.92
## topic not interesting - topic interesting -0.86 0.30
## vis des not interesting - vis des interesting 0.74 0.36 0.53
## time look (in sec) - time look (in sec) 0.89
##
## RC1 RC2 RC3
## SS loadings 3.93 2.87 1.70
## Proportion Var 0.44 0.32 0.19
## Cumulative Var 0.44 0.76 0.94
## [1] "RESEARCHER P15"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2 RC3
## context-dependent - stand-alone 0.66 -0.40 0.33
## quick to make - long time to make 0.93
## sole author - team author 0.90
## author no stats background - stats phd 0.92
## story-first - data-first 0.90
## hate (rip up) - love (publish) 0.93 0.30
## not clever - clever 0.86
##
## RC1 RC2 RC3
## SS loadings 2.25 1.97 1.94
## Proportion Var 0.32 0.28 0.28
## Cumulative Var 0.32 0.60 0.88
## [1] "DESIGNER P8"
## Warning in cor.smooth(r): Matrix was not positive definite, smoothing was done
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2 RC3
## hard to understand - easy to understand 0.31 0.83
## very ugly - very nice 0.90
## author low data-analysis skills - high data-analysis skills 0.70 0.61
## author low data-viz skills - high data-viz skills 0.94
## author low comm skills - high comm skills -0.86
## scroll - look -0.99
## low visual accessibility - high visual accessibility 0.88
## Niche audience - General audience -0.98
## Audience low data literacy - Audience high data literacy 0.90 -0.34
## informative - call-to-action 0.32
## Data collector != designer - Data collector = designer 0.85
## bad data rep - good data rep 0.88
## not trustworthy - trustworthy 0.64 0.72
##
## RC1 RC2 RC3
## SS loadings 5.40 3.50 1.78
## Proportion Var 0.42 0.27 0.14
## Cumulative Var 0.42 0.68 0.82
## [1] "DESIGNER P9"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2 RC3
## would not look at - would look at 0.67 0.70
## niche audience - general audience 0.85 0.42
## designer less design skills - designer more design skills 0.94
## would not read contextual info - would read contextual info 0.97
##
## RC1 RC2 RC3
## SS loadings 2.09 1.10 0.74
## Proportion Var 0.52 0.28 0.19
## Cumulative Var 0.52 0.80 0.99
## [1] "DESIGNER P10"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1
## easy to understand - hard to understand -0.72
## scroll past - stop and look 0.72
## less likely to save - more likely to save 0.89
## less likely to read contextual info - more likely to read
## less likely to "like" - more likely to "like" 0.75
## author less data comm (des) skills - author more data comm (des) skills 0.90
## author less data analysis skills - author more data analysis skills
## RC2
## easy to understand - hard to understand 0.40
## scroll past - stop and look -0.46
## less likely to save - more likely to save
## less likely to read contextual info - more likely to read
## less likely to "like" - more likely to "like" -0.36
## author less data comm (des) skills - author more data comm (des) skills
## author less data analysis skills - author more data analysis skills 0.95
## RC3
## easy to understand - hard to understand
## scroll past - stop and look 0.35
## less likely to save - more likely to save
## less likely to read contextual info - more likely to read 0.95
## less likely to "like" - more likely to "like" 0.35
## author less data comm (des) skills - author more data comm (des) skills
## author less data analysis skills - author more data analysis skills
##
## RC1 RC2 RC3
## SS loadings 3.26 1.55 1.23
## Proportion Var 0.47 0.22 0.18
## Cumulative Var 0.47 0.69 0.86
## [1] "DESIGNER P11"
## Warning in cor.smooth(r): Matrix was not positive definite, smoothing was done
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2
## Pub source (Reddit) (3 = new york post)\r\n - pub source (Jstor) -0.88 -0.33
## not legible (no idea what it's writing) - legible (can read) -0.85
## not readable (no idea about message) - readable (can understand) 0.95
## Not showcasing the data - Showcasing data -0.90
## Not tell story with data - Tell story with data (call for action) 0.87 0.35
## will not share - Share 0.65 0.75
## will not comment - Leave comment 0.47 0.82
## will not upvote - upvote 0.85 0.42
## scroll - stop and look 0.73 0.62
## Poorly communicated - Communicated 0.67
## bad design quality - good design quality 0.95
## Untrustworthy - Trustworthy -0.82 -0.55
## RC3
## Pub source (Reddit) (3 = new york post)\r\n - pub source (Jstor)
## not legible (no idea what it's writing) - legible (can read) -0.47
## not readable (no idea about message) - readable (can understand)
## Not showcasing the data - Showcasing data
## Not tell story with data - Tell story with data (call for action)
## will not share - Share
## will not comment - Leave comment
## will not upvote - upvote
## scroll - stop and look
## Poorly communicated - Communicated 0.68
## bad design quality - good design quality
## Untrustworthy - Trustworthy
##
## RC1 RC2 RC3
## SS loadings 6.39 4.00 0.95
## Proportion Var 0.53 0.33 0.08
## Cumulative Var 0.53 0.87 0.94
## [1] "DESIGNER P12"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2
## audience low stats skills - audience high stats skills 0.97
## illustrative focus - data chart type focus -0.95
## low design quality - high design quality 0.54
## misleading - unbiased 0.83
## not much thought for chart type - thought put into chart type 0.95
## not much thought for storytelling - thought put into storytelling -0.72 -0.33
## author low stats skills - author high stats skills\r\n 0.90 0.34
## scroll on - pause 0.46
## color=decorative/nonexistent - color=visual encoding 0.70
## RC3
## audience low stats skills - audience high stats skills
## illustrative focus - data chart type focus
## low design quality - high design quality 0.78
## misleading - unbiased -0.47
## not much thought for chart type - thought put into chart type
## not much thought for storytelling - thought put into storytelling 0.52
## author low stats skills - author high stats skills\r\n
## scroll on - pause 0.87
## color=decorative/nonexistent - color=visual encoding
##
## RC1 RC2 RC3
## SS loadings 3.57 2.61 1.93
## Proportion Var 0.40 0.29 0.21
## Cumulative Var 0.40 0.69 0.90
## [1] "DESIGNER P13"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1
## less time spent designing (labor) - more time spent designing (labor) 0.39
## less thought put into designing - more thought put into designing 0.82
## less successful at conveying message - more successful at conveying message
## explanatory - call for action/behavior change 0.78
## pure information - author has intention (biased) 0.92
## scroll on - stop and look 0.67
## RC3
## less time spent designing (labor) - more time spent designing (labor) 0.89
## less thought put into designing - more thought put into designing 0.50
## less successful at conveying message - more successful at conveying message
## explanatory - call for action/behavior change 0.48
## pure information - author has intention (biased)
## scroll on - stop and look 0.73
## RC2
## less time spent designing (labor) - more time spent designing (labor)
## less thought put into designing - more thought put into designing
## less successful at conveying message - more successful at conveying message 0.98
## explanatory - call for action/behavior change 0.33
## pure information - author has intention (biased)
## scroll on - stop and look
##
## RC1 RC3 RC2
## SS loadings 2.75 1.90 1.20
## Proportion Var 0.46 0.32 0.20
## Cumulative Var 0.46 0.77 0.97
## [1] "DESIGNER P14"
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 3
## Type of rotation: varimax
##
## Loadings:
## RC1 RC3 RC2
## low visual comm skills - high visual comm skills 0.78 0.46 0.40
## specific audience - general audience 0.84 0.38 0.36
## hard to understand - easy to understand 0.93
## scroll past - stop and look 0.32 0.90
## informative - narrative 0.62 0.75
##
## RC1 RC3 RC2
## SS loadings 1.89 1.76 1.25
## Proportion Var 0.38 0.35 0.25
## Cumulative Var 0.38 0.73 0.98
## PRINT PCA BIPLOT for each participant
type = "cluster_dendrograms/"
topic = "construct_"
level = "group/"
##### PRINT PLOTS TO FILE
png(filename=paste0("plots/",level,type,topic,"RESEARCHERS_dendrogram",".png"),width=15, height = 12, units = "in", res = 300)
cluster(g_researchers, along = 1, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "RESEARCHERS")
dev.off()
## quartz_off_screen
## 2
##### PRINT PLOTS TO SCREEN
cluster(g_researchers, along = 1, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "RESEARCHERS")
## PRINT PCA BIPLOT for each participant
type = "cluster_dendrograms/"
topic = "construct_"
level = "group/"
####### PRINT PLOTS TO FILE
png(filename=paste0("plots/",level,type,topic,"DESIGNERS_dendrogram",".png"),width=15, height = 12, units = "in", res = 300)
cluster(g_designers, along = 1, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "DESIGNERS")
dev.off()
## quartz_off_screen
## 2
####### PRINT PLOTS TO SCREEN
cluster(g_designers, along = 1, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "DESIGNERS")
## PRINT PCA BIPLOT for each participant
type = "cluster_dendrograms/"
topic = "elements_"
level = "group/"
##### PRINT PLOTS TO FILE
png(filename=paste0("plots/",level,type,topic,"RESEARCHERS_dendrogram",".png"),width=10, height = 4, units = "in", res = 300)
cluster(g_researchers, along = 2, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "RESEARCHERS")
dev.off()
## quartz_off_screen
## 2
##### PRINT PLOTS TO SCREEN
(researcher_elements <- cluster(g_researchers, along = 2, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "RESEARCHERS"))
##
## META DATA:
## Number of constructs: 37
## Number of elements: 10
##
## SCALE INFO:
## The grid is rated on a scale from 1 (left pole) to 5 (right pole)
##
## RATINGS:
## CARTOMAP - 5 6 - PENGUIN_DISTS
## IXN_SLOPE - 4 | | 7 - CAREBEAR_BARS
## IXN_EBARS - 3 | | | | 8 - TAXMAN_INFO
## HISTO_DIST - 2 | | | | | | 9 - LADY_LINE
## BAYES_RIDGES - 1 | | | | | | | | 10 - BULLET_BARS
## | | | | | | | | | |
## in in the game (1) 2.5 5 3.5 3.5 1 1 5 5 1.5 4 (1) no skin in t
## ot interesting (2) 4 4 4 4 1 1 5 3 3 1 (2) topic intere
## stop and look (3) 5 5 5 5 1 2.5 4.5 5 2 4 (3) ignore
## pro designers (4) 4.5 4.5 4.5 4.5 1 3.5 4 3 1.5 3 (4) not pro desi
## look (in sec) (5) 3 5 5 5 2 3 3 4 3 2 (5) time look (i
## es interesting (6) 5 5 5 5 1 2 2 4 1 1 (6) vis des not
## igh des skills (7) 4 5 4 5 1 1 3 4 1 1 (7) author low d
## g time to make (8) 3 5 4 5 1.5 1.5 5 1 1 3 (8) quick to mak
## design skills (9) 2 5 3 4 3 2 4 2 1 3 (9) author less
## team author (10) 5 5 5 5 2 4 5 1 1 2 (10) sole author
## e conveying m (11) 4 2 3 4 2 3 4 3 5 3 (11) author effe
## dience ignore (12) 5 3 5 4 3 4 5 3 5 4 (12) target audi
## scroll (13) 4 5 5 5 2 2 5 4 5 5 (13) stop
## or competancy (14) 4.5 4 5 4 5 5 5 2 5 5 (14) high author
## 't trust data (15) 5 5 3 3 5 5 5 3 5 5 (15) trust data
## would change (16) 3 3 4 5 5 5 4 3 5 5 (16) would not
## about design (17) 4 5 4 4 1 3 1 3 1 1 (17) does not ca
## sensational (18) 4 5 5 5 3 3 1 2 1 1 (18) dry
## author tried (19) 3 5 4 4 1 2 2 1 1 1 (19) author did
## stand-alone (20) 5 5 5 2 3 2 1 1 1.5 1 (20) context-dep
## clever (21) 2.5 5 4 5 4.5 4 1.5 2 2 1.5 (21) not clever
## ove (publish) (22) 3 4 4 4 4 2 2 1.5 2 2 (22) hate (rip u
## biased (23) 5 5 5 5 4 5 1 4 2 2 (23) not biased
## nce edu level (24) 5 5 4 4 4 4 1 3 1 1 (24) high edu le
## story-first (25) 4.5 4.5 4.5 4.5 2 4.5 1 5 1 1.5 (25) data-first
## stats skills (26) 5 3 4 4 3 5 1 2 1 1 (26) author more
## viz training (27) 5 4 4 4 3 5 1 3 1 2 (27) viewer data
## cipant ignore (28) 5 2 4 4 4 5 3 3 2 2 (28) participant
## persuasive (29) 5 5 5 5 4 5 1 5 3 4 (29) informative
## ts background (30) 5 4 4.5 4.5 3 5 1 4 3 4 (30) stats phd
## 't trust data (31) 5 5 5 5 5 5 1 5 5 5 (31) trust data
## ated audience (32) 5 5 4 4 4.5 4 1 3 3.5 3 (32) more educat
## eral audience (33) 5 5 5 4 4 5 1 3 4 3 (33) niche audie
## trust author (34) 5 5 5 5 5 5 1 3 5 3 (34) trust autho
## re about data (35) 5 5 5 5 4 5 1 1 2 4 (35) care about
## informal (36) 5 5 5 5 4 5 1 2 3 4 (36) formal
## hor's message (37) 5 5 5 5 2 5 1 2 1 5 (37) believe aut
##
##
##
##
##
##
##
## h
## s
##
## g
## n
## i
## e
## e
##
##
## c
## e
##
##
##
## c
## r
##
## n
## e
##
## p
##
## v
##
##
##
##
##
##
##
## e
## n
## r
## d
##
## h
## PRINT PCA BIPLOT for each participant
type = "cluster_dendrograms/"
topic = "elements_"
level = "group/"
##### PRINT PLOTS TO FILE
plot.new()
png(filename=paste0("plots/",level,type,topic,"DESIGNERS_dendrogram",".png"),width=10, height = 4, units = "in", res = 300)
cluster(g_designers, along = 2, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "DESIGNERS")
dev.off()
## quartz_off_screen
## 2
##### PRINT PLOTS TO SCREEN
(designer_elements <- cluster(g_designers, along = 2, #1=constructs, 2 = elements, 0 = both (default)
trim=200,
dmethod = "euclidean",#distance measure TODO evaluate diff options
cmethod="ward.D", #agglomeration method TODO evaluate diff options
align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
cex = 1, lab.cex = 1, main = "DESIGNERS"))
##
## META DATA:
## Number of constructs: 56
## Number of elements: 10
##
## SCALE INFO:
## The grid is rated on a scale from 1 (left pole) to 5 (right pole)
##
## RATINGS:
## TAXMAN_INFO - 5 6 - CARTOMAP
## HISTO_DIST - 4 | | 7 - PENGUIN_DISTS
## IXN_SLOPE - 3 | | | | 8 - LADY_LINE
## IXN_EBARS - 2 | | | | | | 9 - CAREBEAR_BARS
## BAYES_RIDGES - 1 | | | | | | | | 10 - BULLET_BARS
## | | | | | | | | | |
## isual encoding (1) 1 1 4 5 5 1 1 5 5 4 (1) color=decorat
## likely to save (2) 5 5 5 5 5 1 1 4 4 3 (2) less likely t
## ata-viz skills (3) 1 4 3 4 2 1 1 5 2 2 (3) author low da
## very nice (4) 2 5 2 5 3 1 1 5 3 3 (4) very ugly
## design quality (5) 3 4 4 4 1 2 2 4 4 3 (5) low design qu
## likely to read (6) 5 5 3 4 1 1 3 5 2 4 (6) less likely t
## to understand (7) 4 4 2 3 2 1 4 5 1 1 (7) hard to under
## pause (8) 3 4 1 3 1 3 3 5 5 3 (8) scroll on
## accessibility (9) 4 5 1 1 2 3 3 4 3 2 (9) low visual ac
## to understand (10) 1 4 3 3 4 1 2 3 1 1 (10) hard to unde
## storytelling (11) 2 4 1 4 1 1 2 1 1 1 (11) not much tho
## t conveying m (12) 4 3 1 1 2 2 3 1 1 1 (12) less success
## to understand (13) 5 5 2 3 4 1 2 1 1 1 (13) hard to unde
## Communicated (14) 3 3 3 3 2 2 1.5 2.5 1.2 1.5 (14) Poorly commu
## would look at (15) 3 3 4 4 1 1 2 2 3 1 (15) would not lo
## eral audience (16) 3 4 4 4 3 1 2 2 2 2 (16) specific aud
## l comm skills (17) 3 4 4 4 3 1 2 2 1 2 (17) low visual c
## esign quality (18) 3.2 4 3.5 4 2.8 2.2 2 1.7 2 2.5 (18) bad design q
## comm (des) sk (19) 4 3.5 3.5 5 3 1 2.5 2 3 2 (19) author less
## to chart type (20) 1 2 5 2 1 1 1 3 4 3 (20) not much tho
## unbiased (21) 1 1 5 1 3 2 1 2 3 4 (21) misleading
## informative (22) 1 5 5 1 1 4 2 2 3 1 (22) call-to-acti
## scroll past (23) 1 1 1 1 1 4 1 5 5 5 (23) stop and loo
## ntextual info (24) 1 2 2 1 1 3 1 3 2 5 (24) would read c
## trustworthy (25) 1 2 2 2 2 1 2 5 3 4 (25) not trustwor
## alysis skills (26) 1 1 2 3 3 2 1 5 4 4 (26) author low d
## good data rep (27) 1 3 3 3 2 1 1 5 2 5 (27) bad data rep
## owcasing data (28) 1 2 1 1 2 1 1 2 4 2 (28) Not showcasi
## analysis skil (29) 1 2 2 1 2 2 1 3 4 2 (29) author less
## Trustworthy (30) 1 1 1 1 1.8 1.7 2 3 3 2 (30) Untrustworth
## l not comment (31) 1 1 1.5 1 1 1 1 3 3 2 (31) Leave commen
## le (can read) (32) 1 1 1 1 1 1 1 1 3 2 (32) not legible
## e/nonexistent (33) 5 5 2 1 1 5 5 1 1 2 (33) color=visual
## ikely to save (34) 1 1 1 1 1 5 5 2 2 3 (34) more likely
## ta-viz skills (35) 5 2 3 2 4 5 5 1 4 4 (35) high data-vi
## very ugly (36) 4 1 4 1 3 5 5 1 3 3 (36) very nice
## esign quality (37) 3 2 2 2 5 4 4 2 2 3 (37) high design
## ad contextual (38) 1 1 3 2 5 5 3 1 4 2 (38) more likely
## to understand (39) 2 2 4 3 4 5 2 1 5 5 (39) easy to unde
## scroll on (40) 3 2 5 3 5 3 3 1 1 3 (40) pause
## accessibility (41) 2 1 5 5 4 3 3 2 3 4 (41) high visual
## to understand (42) 5 2 3 3 2 5 4 3 5 5 (42) easy to unde
## for storytell (43) 4 2 5 2 5 5 4 5 5 5 (43) thought put
## to understand (44) 1 1 4 3 2 5 4 5 5 5 (44) easy to unde
## t conveying m (45) 2 3 5 5 4 4 3 5 5 5 (45) more success
## communicated (46) 3 3 3 3 4 4 4.5 3.5 4.8 4.5 (46) Communicated
## d not look at (47) 3 3 2 2 5 5 4 4 3 5 (47) would look a
## ific audience (48) 3 2 2 2 3 5 4 4 4 4 (48) general audi
## l comm skills (49) 3 2 2 2 3 5 4 4 5 4 (49) high visual
## esign quality (50) 2.8 2 2.5 2 3.2 3.8 4 4.3 4 3.5 (50) good design
## comm (des) sk (51) 2 2.5 2.5 1 3 5 3.5 4 3 4 (51) author more
## for chart typ (52) 5 4 1 4 5 5 5 3 2 3 (52) thought put
## misleading (53) 5 5 1 5 3 4 5 4 3 2 (53) unbiased
## all-to-action (54) 5 1 1 5 5 2 4 4 3 5 (54) informative
## stop and look (55) 5 5 5 5 5 2 5 1 1 1 (55) scroll past
## ntextual info (56) 5 4 4 5 5 3 5 3 4 1 (56) would not re
## MANUAL DENDROGRAM FOR ELEMENTS—DESIGNERES
# get ratings
r <- getRatingLayer(g_designers)
# transpose for elements
r <- t(r)
# distance matrix for elements
d <- dist(r, method = "euclidean")
# cluster analysis
fit.elements <- hclust(d, method = "ward.D")
# create dendrogram tree
dend.el_design <- as.dendrogram(fit.elements)
# plot(dend.el_design, horiz=TRUE)
## MANUAL DENDROGRAM FOR ELEMENTS
# get ratings
r <- getRatingLayer(g_researchers)
# transpose for elements
r <- t(r)
# distance matrix for elements
d <- dist(r, method = "euclidean")
# cluster analysis
fit.elements <- hclust(d, method = "ward.D")
# create dendrogram tree
dend.el_research <- as.dendrogram(fit.elements)
# plot(dend.el_research, horiz=TRUE)
## the tanglegram / back to back dendrograms
# l <- dendlist(dend.el_design, dend.el_research)
# tanglegram(l)
plot.new()
png(filename="plots/group/COMPARE_ELEMENTS.png",
width=12, height = 6, units = "in", res = 300)
dendbackback(dend.el_design, dend.el_research,
sort = TRUE,
margin_inner = 8,
main_left = "DESIGNERS",
main_right = "RESEARCHERS",
lab.cex = 1,
highlight_distinct_edges = TRUE,
common_subtrees_color_branches = TRUE,
common_subtrees_color_lines = TRUE,
main="ELEMENTS by GROUP")
dev.off()
## quartz_off_screen
## 2
## to fix error in dev.off error , MAY NEED to run several times
# while (!is.null(dev.list())) dev.off()
## PRINT TO SCREEN
dendbackback(dend.el_design, dend.el_research,
sort = TRUE,
margin_inner = 8,
main_left = "DESIGNERS",
main_right = "RESEARCHERS",
lab.cex = 1,
highlight_distinct_edges = TRUE,
common_subtrees_color_branches = TRUE,
common_subtrees_color_lines = TRUE,
main="ELEMENTS by GROUP")
type = "pca_biplots/"
topic = ""
level = "group/"
plot.new()
png(filename=paste0("plots/",level,type,topic,"RESEARCHERS_biplot",".png"))
# ,width=10, height = 4, units = "in", res = 300)
## RESEARCHER BIPLOT
title = "RESEARCHERS"
# print(title)
biplot2d(g_researchers,
dim = c(2,1),
zoom = 1,
## construct s
c.lines = TRUE,
col.c.lines= gray(0.9),
c.label.cex = 0.5,
c.labels.inside = FALSE,
c.label.col = "blue",
## elements
# rect.margins = c(2,2),
e.point.col = "red",
e.label.col = "red",
e.label.cex = 0.5, #element label size
## size and margins
mai = c(0.2,1.5,.2,1.5),
unity=TRUE, #just makes it neater
scale.e = 0.75,
)
op <- par(# fig = c(0,1,0.5,1),
cex.main = 0.75, #title size
new = TRUE)
title(title)
dev.off()
## quartz_off_screen
## 2
## RESEARCHER BIPLOT
title = "RESEARCHERS"
# print(title)
biplot2d(g_researchers,
dim = c(2,1),
zoom = 1,
## construct s
c.lines = TRUE,
col.c.lines= gray(0.9),
c.label.cex = 0.5,
c.labels.inside = FALSE,
c.label.col = "blue",
## elements
# rect.margins = c(2,2),
e.point.col = "red",
e.label.col = "red",
e.label.cex = 0.5, #element label size
## size and margins
mai = c(0.2,1.5,.2,1.5),
unity=TRUE, #just makes it neater
scale.e = 0.75,
)
op <- par(# fig = c(0,1,0.5,1),
cex.main = 0.75, #title size
new = TRUE)
title(title)
type = "pca_biplots/"
topic = ""
level = "group/"
plot.new()
png(filename=paste0("plots/",level,type,topic,"DESIGNERS_biplot",".png"))
# ,width=10, height = 4, units = "in", res = 300)
## DESIGNERS BIPLOT
title = "DESIGNERS"
# print(title)
biplot2d(g_designers,
dim = c(2,1),
zoom = 1,
## construct s
c.lines = TRUE,
col.c.lines= gray(0.9),
c.label.cex = 0.5,
c.labels.inside = FALSE,
c.label.col = "blue",
## elements
# rect.margins = c(2,2),
e.point.col = "red",
e.label.col = "red",
e.label.cex = 0.5, #element label size
## size and margins
mai = c(0.2,1.5,.2,1.5),
unity=TRUE, #just makes it neater
scale.e = 0.75,
)
op <- par(# fig = c(0,1,0.5,1),
cex.main = 0.75, #title size
new = TRUE)
title(title)
dev.off()
## quartz_off_screen
## 2
## DESIGNERS BIPLOT
title = "DESIGNERS"
# print(title)
biplot2d(g_researchers,
dim = c(2,1),
zoom = 1,
## construct s
c.lines = TRUE,
col.c.lines= gray(0.9),
c.label.cex = 0.5,
c.labels.inside = FALSE,
c.label.col = "blue",
## elements
# rect.margins = c(2,2),
e.point.col = "red",
e.label.col = "red",
e.label.cex = 0.5, #element label size
## size and margins
mai = c(0.2,1.5,.2,1.5),
unity=TRUE, #just makes it neater
scale.e = 0.75,
)
op <- par(# fig = c(0,1,0.5,1),
cex.main = 0.75, #title size
new = TRUE)
title(title)
###### OPEN REP GRID APPROACH
constructPca(p15, nfactors = 2, trim=50, rotate="varimax",method="pearson")
##
## #################
## PCA of constructs
## #################
##
## Number of components extracted: 2
## Type of rotation: varimax
##
## Loadings:
## RC1 RC2
## context-dependent - stand-alone 0.59 -0.58
## quick to make - long time to make 0.90 0.18
## sole author - team author 0.87 -0.15
## author no stats backgroun - stats phd 0.08 0.92
## story-first - data-first -0.17 0.86
## hate (rip up) - love (publish) 0.76 -0.40
## not clever - clever 0.55 -0.61
##
## RC1 RC2
## SS loadings 2.84 2.51
## Proportion Var 0.41 0.36
## Cumulative Var 0.41 0.76
###### VERSION psych::principal()
## constructPca() is equivalent to this
corr <- constructCor(p15)
(p <- principal(corr, nfactors = 2, rotate="varimax", cor = "cor"))
## Principal Components Analysis
## Call: principal(r = corr, nfactors = 2, rotate = "varimax", cor = "cor")
## Standardized loadings (pattern matrix) based upon correlation matrix
## RC1 RC2 h2 u2 com
## context-de - stand-alon 0.59 -0.58 0.69 0.31 2.0
## quick to m - long time 0.90 0.18 0.84 0.16 1.1
## sole autho - team autho 0.87 -0.15 0.78 0.22 1.1
## author no - stats phd 0.08 0.92 0.85 0.15 1.0
## story-firs - data-first -0.17 0.86 0.77 0.23 1.1
## hate (rip - love (publ 0.76 -0.40 0.75 0.25 1.5
## not clever - clever 0.55 -0.61 0.68 0.32 2.0
##
## RC1 RC2
## SS loadings 2.84 2.51
## Proportion Var 0.41 0.36
## Cumulative Var 0.41 0.76
## Proportion Explained 0.53 0.47
## Cumulative Proportion 0.53 1.00
##
## Mean item complexity = 1.4
## Test of the hypothesis that 2 components are sufficient.
##
## The root mean square of the residuals (RMSR) is 0.11
##
## Fit based upon off diagonal values = 0.95
print(p)## PREFERRED OUTPUT!
## Principal Components Analysis
## Call: principal(r = corr, nfactors = 2, rotate = "varimax", cor = "cor")
## Standardized loadings (pattern matrix) based upon correlation matrix
## RC1 RC2 h2 u2 com
## context-de - stand-alon 0.59 -0.58 0.69 0.31 2.0
## quick to m - long time 0.90 0.18 0.84 0.16 1.1
## sole autho - team autho 0.87 -0.15 0.78 0.22 1.1
## author no - stats phd 0.08 0.92 0.85 0.15 1.0
## story-firs - data-first -0.17 0.86 0.77 0.23 1.1
## hate (rip - love (publ 0.76 -0.40 0.75 0.25 1.5
## not clever - clever 0.55 -0.61 0.68 0.32 2.0
##
## RC1 RC2
## SS loadings 2.84 2.51
## Proportion Var 0.41 0.36
## Cumulative Var 0.41 0.76
## Proportion Explained 0.53 0.47
## Cumulative Proportion 0.53 1.00
##
## Mean item complexity = 1.4
## Test of the hypothesis that 2 components are sufficient.
##
## The root mean square of the residuals (RMSR) is 0.11
##
## Fit based upon off diagonal values = 0.95
############ ?? not really sure if this is element or construct?
###### VERSION base::prcomp()
### 1. CREATE TRANSPOSED DF FOR PCA ON CONSTRUCTS
df <- df_coded %>%
filter(PID=="P15") %>%
select(CAREBEAR_BARS:BAYES_RIDGES, POLES)
poles <- df$POLES # save construct names
#transpose
df <- t(df) %>% as_tibble()
## Warning: The `x` argument of `as_tibble.matrix()` must have unique column names if
## `.name_repair` is omitted as of tibble 2.0.0.
## ℹ Using compatibility `.name_repair`.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
colnames(df) = poles
#drop last row
df <- df[1:length(stimuli),] # %>% slice(1:(n() - 1))
df <- df %>% mutate_all(as.numeric)
df <- df %>% mutate(element = factor(stimuli))
dpca <- df %>% select(where(is.numeric)) #get just the numeric cols
### 2. RUN PCA ON DT
pca <- prcomp(dpca, scale = TRUE )
summary(pca)
## Importance of components:
## PC1 PC2 PC3 PC4 PC5 PC6 PC7
## Standard deviation 1.9293 1.2776 0.9003 0.67565 0.44771 0.37758 0.18806
## Proportion of Variance 0.5318 0.2332 0.1158 0.06521 0.02864 0.02037 0.00505
## Cumulative Proportion 0.5318 0.7649 0.8807 0.94595 0.97458 0.99495 1.00000
# data plot
autoplot(pca, data = df, color = "element", label=TRUE, label.size=5) + theme_minimal()
# biplot
biplot(pca)
# scree plot
plot(pca, type="lines")
more work here to explore clustering methods avail in https://www.datanovia.com/en/blog/cluster-analysis-in-r-simplified-and-enhanced/ https://www.sthda.com/english/wiki/wiki.php?id_contents=7851#visualize-supplementary-quantitative-variables
## TABLE AT FIRST
crosstab(data = df_codedElements, rowvar = FIRST, colvar = SAMPLE, type = "percent") # or "prop.col",
## SAMPLE
## FIRST DESIGNER RESEARCHER Total
## artifact 34.41% 15.05% 49.46%
## audience 5.38% 5.38% 10.75%
## data 1.08% 3.23% 4.30%
## maker 18.28% 16.13% 34.41%
## mode 1.08% 0.00% 1.08%
## Total 60.22% 39.78% 100.00%
## DATAFRAME
df <- df_codedElements %>%
select(FIRST,SECOND,THIRD,POLES,CODE_FULL,ELEMENT,SAMPLE,PID)
## TWO LEVEL TABLE
# one row per participant element X code
table_df <- df %>%
count(FIRST, SECOND, SAMPLE) %>%
pivot_wider(names_from = SAMPLE, values_from = n, values_fill = 0) %>%
arrange(FIRST, SECOND) %>%
group_by(FIRST) %>%
mutate(FIRST = if_else(row_number() == 1, FIRST, ""),
DESIGNER=DESIGNER/10,
RESEARCHER=RESEARCHER/10
) %>%
ungroup()
table_df %>% gt()
| FIRST | SECOND | DESIGNER | RESEARCHER |
|---|---|---|---|
| artifact | behavior | 14 | 5 |
| design | 4 | 5 | |
| genre | 0 | 1 | |
| intent | 5 | 1 | |
| register | 0 | 1 | |
| trust | 3 | 1 | |
| usability | 6 | 0 | |
| audience | behavior | 0 | 1 |
| skill | 2 | 3 | |
| type | 3 | 1 | |
| data | source | 1 | 0 |
| topic | 0 | 1 | |
| trust | 0 | 2 | |
| maker | dilligence | 4 | 4 |
| intent | 2 | 1 | |
| skill | 11 | 7 | |
| trust | 0 | 2 | |
| type | 0 | 1 | |
| mode | type | 1 | 0 |
## THREE LEVEL TABLE
table_df <- df %>%
count(FIRST, SECOND, THIRD, SAMPLE) %>%
pivot_wider(
names_from = SAMPLE,
values_from = n,
values_fill = 0
) %>%
arrange(FIRST, SECOND, THIRD) %>%
group_by(FIRST, SECOND) %>%
mutate(
THIRD = as.character(THIRD),
SECOND = if_else(row_number() == 1, SECOND, ""),
FIRST = if_else(row_number() == 1, FIRST, ""),
DESIGNER=DESIGNER/10,
RESEARCHER=RESEARCHER/10
) %>%
ungroup()
table_df %>% gt()
| FIRST | SECOND | THIRD | DESIGNER | RESEARCHER |
|---|---|---|---|---|
| artifact | behavior | commenting | 1 | 0 |
| encounter | 7 | 4 | ||
| infoseek | 2 | 0 | ||
| like/upvote | 2 | 0 | ||
| redesign | 0 | 1 | ||
| save | 1 | 0 | ||
| sharing | 1 | 0 | ||
| artifact | design | appeal | 1 | 0 |
| appropriateness | 1 | 0 | ||
| clever | 0 | 1 | ||
| duration | 0 | 1 | ||
| encoding | 1 | 0 | ||
| like | 0 | 1 | ||
| quality | 1 | 0 | ||
| salience | 0 | 1 | ||
| vibe | 0 | 1 | ||
| artifact | genre | completeness | 0 | 1 |
| artifact | intent | data | 1 | 0 |
| narrative | 1 | 0 | ||
| NA | 3 | 1 | ||
| artifact | register | NA | 0 | 1 |
| artifact | trust | NA | 3 | 1 |
| artifact | usability | accessibility | 1 | 0 |
| clarity | 1 | 0 | ||
| effort | 2 | 0 | ||
| legibility | 1 | 0 | ||
| readability | 1 | 0 | ||
| audience | behavior | encounter | 0 | 1 |
| audience | skill | dataliteracy | 1 | 1 |
| statistics | 1 | 0 | ||
| NA | 0 | 2 | ||
| audience | type | NA | 3 | 1 |
| data | source | NA | 1 | 0 |
| data | topic | interest | 0 | 1 |
| data | trust | NA | 0 | 2 |
| maker | dilligence | data | 0 | 1 |
| design | 2 | 1 | ||
| storytelling | 1 | 0 | ||
| NA | 1 | 2 | ||
| maker | intent | NA | 2 | 1 |
| maker | skill | communication | 4 | 1 |
| data | 2 | 0 | ||
| design | 3 | 3 | ||
| statistics | 1 | 2 | ||
| visualization | 1 | 0 | ||
| NA | 0 | 1 | ||
| maker | trust | NA | 0 | 2 |
| maker | type | group size | 0 | 1 |
| mode | type | NA | 1 | 0 |
## CONSTRUCT LEVEL TABLE
table_df <- df %>%
group_by(FIRST, SECOND, THIRD, SAMPLE) %>%
summarise(POLES_values = paste(unique(POLES), collapse = ","), .groups = "drop") %>%
pivot_wider(
names_from = SAMPLE,
values_from = POLES_values,
values_fill = ""
) %>%
arrange(FIRST, SECOND, THIRD) %>%
group_by(FIRST, SECOND) %>%
mutate(
THIRD = as.character(THIRD),
SECOND = if_else(row_number() == 1, SECOND, ""),
FIRST = if_else(row_number() == 1, FIRST, "")
) %>%
ungroup()
table_df %>% gt()
| FIRST | SECOND | THIRD | DESIGNER | RESEARCHER |
|---|---|---|---|---|
| artifact | behavior | commenting | will not comment-Leave a comment | |
| encounter | low engagement (scroll past)-high engagement (stop and look),would not spend time looking at-would spend time looking at,scroll past-stop and look,keep scrolling-stop and look ,scroll on-pause,scroll on-stop and look | stop-scroll,ignore-stop and look,participant would not want to look-participant would want to look,how long to look at chart (in seconds)-NA | ||
| infoseek | would not read contextual information-would read more contextual information,less likely to read contextual information-more likely to read | |||
| like/upvote | less likely to “like” react-more likely to “like”,will not upvote-will upvote | |||
| redesign | would not want to change based on context-would want to change | |||
| save | less likely to save-more likely to save | |||
| sharing | will not share-Share it with someone | |||
| artifact | design | appeal | very ugly (personal preference)-very nice | |
| appropriateness | bad data representation-good data representation | |||
| clever | not clever-clever | |||
| duration | quick to make-long time to make | |||
| encoding | color as decorative or nonexistent-color as a visual encoding | |||
| like | hate (rip up)-love (publish) | |||
| quality | lower design quality-higher design quality | |||
| salience | visualization design not interesting-visualization design interesting | |||
| vibe | dry-sensational | |||
| artifact | genre | completeness | context-dependent-stand-alone | |
| artifact | intent | data | Not showcasing the data-Showcasing the data | |
| narrative | Not tell a story with data-Tell a story with data (call for action) | |||
| NA | more illustrative focus-more data chart type focus,explanatory-call for action (want to change behavior),informative-narrative | data serving story (story first)-story serving data (data first/story ad hoc) | ||
| artifact | register | NA | informal-formal | |
| artifact | trust | NA | not trustworthy-trustworthy,Untrustworthy-Trustworthy,misleading-unbiased | not biased-biased |
| artifact | usability | accessibility | low accessibility to visually impaired readers-high accessibility to visually impaired readers | |
| clarity | not clear overall (hard to understand)-clear overall (easy to understand) | |||
| effort | easier to understand-harder to understand,hard to understand-easy to understand | |||
| legibility | not legible (no idea what it’s writing)-legible (I can read it!) | |||
| readability | not readable (no idea about the message)-readable (I can understand it!) | |||
| audience | behavior | encounter | target audience would not want to look-target audience want to look at it | |
| audience | skill | dataliteracy | Audience low data literacy-Audience high data literacy | viewer no training in data visualization-viewer training in data visualization |
| statistics | audience low stats skills-audience high stats skills | |||
| NA | audience education level (low)-high edu level,less educated audience-more educated audience | |||
| audience | type | NA | Niche audience-General audience,niche audience-general audience,specific audience-general audience | general audience-niche audience |
| data | source | NA | Data collector and data designer are different people-Data collector and data designer are the same person | |
| data | topic | interest | topic not interesting-topic interesting | |
| data | trust | NA | don't trust the data-trust the data | |
| maker | dilligence | data | does not care about data-care about data | |
| design | not much thought put into chart type-thought put into chart type,less thought put into designing-more thought put into designing | does not care about design-care about design | ||
| storytelling | not much thought put into storytelling-thought put into storytelling | |||
| NA | less time spent designing (labor)-more time spent designing (labor) | no skin in the game (no thought put into making the design of the viz)-skin in the game (more thought put into making the design),author did not try-author tried | ||
| maker | intent | NA | More informative-More call-to-action, pure information-author has an intention (biased) | informative-persuasive |
| maker | skill | communication | author low communication skills-author high communication skills,Poorly communicated-Communicated,less successful at conveying message-more successful at conveying message,low visual communication skills-high visual communication skills | author not effective in conveying message-author effective in conveying message |
| data | author low data-analysis skills-high data-analysis skills,author less data analysis skills-author more data analysis skills | |||
| design | designer has less design skills-designer has more design skills,author less data communication (design) skills-author more data communication (design) skills,Design quality - bad-Design quality - Good | not professional designers-professional designers,author has less design skills-more design skills,author's design skills (low)-(high) | ||
| statistics | author low stats skills-author high stats skills | author has less statistics skills-more statistics skills,author no stats background-stats phd | ||
| visualization | author low data-viz skills-high data-viz skills | |||
| NA | low author competancy-high author competancy | |||
| maker | trust | NA | don't believe the author's message-believe author's message,don't turst the author-trust the author | |
| maker | type | group size | likelihood of made by one person (3 means unsure)-probably made by multiple people | |
| mode | type | NA | Publication source (Reddit) (3 = new york post) -publication source (Jstor) |
###### CODE LEVEL TO XLS
table_df <- df %>%
group_by(FIRST, SECOND, THIRD, SAMPLE) %>%
summarise(POLES_values = paste(unique(POLES), collapse = "\n"), .groups = "drop") %>%
pivot_wider(
names_from = SAMPLE,
values_from = POLES_values,
values_fill = ""
) %>%
arrange(FIRST, SECOND, THIRD) %>%
group_by(FIRST, SECOND) %>%
mutate(
THIRD = as.character(THIRD),
SECOND = if_else(row_number() == 1, SECOND, ""),
FIRST = if_else(row_number() == 1, FIRST, "")
) %>%
ungroup()
# table_df
# knitr::kable(table_df)
# library(gt)
# table_df %>% gt()
write.xlsx(table_df, file = "table.xlsx", colNames=TRUE, asTable = TRUE)
# df <- df_codedElements %>%
# select(
# value, ELEMENT,CODE,POLES,PID,SAMPLE
# )
#
#
#
# m0 <- lmer(value ~ (1|PID), data = df )
# summary(m0)
#
# levels(df$ELEMENT)
# m1 <- lmer(value ~ ELEMENT*CODE + SAMPLE + (1|PID), data = df)
# summary(m1)
# plot_model(m1,type="pred", terms=c("CODE"))
## TODO WTAF is measured as 'conflict'? see
# https://docs.openrepgrid.org/articles/web/measures-conflict.html
#Bell, R. C. (2004). A new approach to measuring inconsistency or conflict in grids. Personal Construct Theory & Practice, 1, 53–59.
#Heider, F. (1946). Attitudes and cognitive organization. Journal of Psychology, 21, 107–112.
indexConflict3(p4)
##
## ##########################################################
## CONFLICT OR INCONSISTENCIES BASED ON TRIANGLE INEQUALITIES
## ##########################################################
##
## Potential conflicts in grid: 150
## Actual conflicts in grid: 66
## Overall percentage of conflict in grid: 44 %
##
## ELEMENTS
## ########
##
## Percent of conflict attributable to element:
##
## percentage
## 1 CAREBEAR_BARS 1666.67
## 2 LADY_LINE 1363.64
## 3 BULLET_BARS 1212.12
## 4 CARTOMAP 454.55
## 5 TAXMAN_INFO 151.52
## 6 PENGUIN_DISTS 606.06
## 7 HISTO_DIST 1666.67
## 8 IXN_EBARS 909.09
## 9 IXN_SLOPE 909.09
## 10 BAYES_RIDGES 1060.61
##
## Chi-square test of equal count of conflicts for elements.
##
## Chi-squared test for given probabilities
##
## data: x$e.count
## X-squared = 14.909, df = 9, p-value = 0.09346
##
##
## CONSTRUCTS
## ##########
##
## Percent of conflict attributable to construct:
##
## percentage
## 1 not biased - biased 16.67
## 2 dry - sensationa 14.39
## 3 don't trus - trust data 12.88
## 4 don't beli - believe au 17.42
## 5 low audien - high edu l 14.39
## 6 stop - scroll 24.24
##
## Chi-square test of equal count of conflicts for constructs.
##
## Chi-squared test for given probabilities
##
## data: x$c.count
## X-squared = 6.5455, df = 5, p-value = 0.2567
DECIDED to only ever consider this at the individual level b/c whether the same construct is valenced, and toward what pole can meaningfully differ between participants thus complicating / negating any group-level analysis.
#https://docs.openrepgrid.org/articles/web/measures-implicative.html
# Implicative dilemmas are closely related to the notion of conflict. An implicative dilemma arises when a desired change on one construct is associated with an undesired implication on another construct. E. g. a timid subject may want to become more socially skilled but associates being socially skilled with different negative characteristics (selfish, insensitive etc.). Hence, he may anticipate that becoming less timid will also make him more selfish (cf. Winter, 1982).
i <- indexDilemma(p15, self=, ideal=10)
## TODO really actually figure out 1. if this is useful and 2. what it is doing. 3. how to define the self (vs) ideal self and align poles
plot(i)
# # CREATE a custom grid from the coded constructs dataframe
#
# ######## FILTER MASTER DATAFRAME
# d <- df %>%
# filter(
# PID=="P15",
# CONSTRUCT_TYPE=="NUMERIC"
# ) %>%
# mutate_at(vars(CAREBEAR_BARS:BAYES_RIDGES), as.numeric) %>%
# mutate(
# COUPLED = paste0(POLE_LEFT,"-",POLE_RIGHT),
# CONSTRUCT = paste0(FIRST,"(",SECOND,")")
# ) %>% select (
# POLE_LEFT, POLE_RIGHT,
# COUPLED,
# CONSTRUCT,
# CAREBEAR_BARS:BAYES_RIDGES)
# # ) %>% column_to_rownames(var = "CONSTRUCT")
# # ) %>% column_to_rownames(var = "CODE_STANDARD")
# ###########
#
# ## elements
# e <- d %>% select(-(POLE_LEFT:CONSTRUCT)) %>% colnames()
# # e <- c("care-bear","diamond-lady","bullets","heatmap","taxes",
# # "penguins","physics-matplotlib","interaction","slope-interaction","bayesian")
# ## construct left pole
# l <- d %>% pull(POLE_LEFT)
# ## construct right pole
# r <- d %>% pull(POLE_RIGHT)
# ## construct code
# c <- d %>% pull(CONSTRUCT)
#
# ## ratings
# ## have to unravel dataframe by row; unlist goes by column,
# ## so instead, first transpose, then use
# s <- c(t(d %>% select(CAREBEAR_BARS:BAYES_RIDGES)))
#
# ## ASSEMBLE NEW REPGRID OBJECT
# ## args
# args <- list(
# name = e,
# l.name = c,
# # r.name = r,
# coupled =F,
# scores = s
# )
# t15 <- makeRepgrid(args)
# t15 <- setScale(t15, 1, 5)
# t15
#
#
# g_double <- t15+t15
statsConstructs(p15,trim=50)
##
## ####################################
## Desriptive statistics for constructs
## ####################################
##
## vars n mean sd median trimmed mad
## (1) context-dependent - stand-alone 1 10 3.35 1.73 4.00 3.44 1.48
## (2) quick to make - long time to make 2 10 3.00 1.68 3.00 3.00 2.59
## (3) sole author - team author 3 10 2.50 1.78 1.50 2.38 0.74
## (4) author no stats backgroun - stats phd 4 10 3.80 1.21 4.00 4.00 1.11
## (5) story-first - data-first 5 10 3.30 1.69 4.50 3.38 0.37
## (6) hate (rip up) - love (publish) 6 10 3.15 1.06 3.50 3.12 1.11
## (7) not clever - clever 7 10 2.80 1.44 2.75 2.81 1.85
## min max range skew kurtosis se
## (1) context-dependent - stand-alone 1 5.0 4.0 -0.45 -1.72 0.55
## (2) quick to make - long time to make 1 5.0 4.0 -0.05 -1.86 0.53
## (3) sole author - team author 1 5.0 4.0 0.37 -1.87 0.56
## (4) author no stats backgroun - stats phd 1 5.0 4.0 -1.07 0.16 0.38
## (5) story-first - data-first 1 5.0 4.0 -0.39 -1.91 0.53
## (6) hate (rip up) - love (publish) 2 4.5 2.5 -0.10 -2.00 0.33
## (7) not clever - clever 1 4.5 3.5 -0.04 -1.93 0.45
## ARF TODO reformat as df to get ridgeplot of histograms w/ constructs as rows?
# calculate descriptive statistics
statsElements(p15)
##
## ##################################
## Desriptive statistics for elements
## ##################################
##
## vars n mean sd median trimmed mad min max range skew
## (1) CAREBEAR_BARS 1 7 2.50 1.89 1 2.50 0.00 1.0 5.0 4.0 0.28
## (2) LADY_LINE 2 7 3.79 1.41 4 3.79 1.48 1.0 5.0 4.0 -0.92
## (3) BULLET_BARS 3 7 3.71 1.15 4 3.71 0.74 1.5 5.0 3.5 -0.80
## (4) CARTOMAP 4 7 2.86 1.11 3 2.86 1.48 1.5 4.5 3.0 0.23
## (5) TAXMAN_INFO 5 7 4.64 0.48 5 4.64 0.00 4.0 5.0 1.0 -0.47
## (6) PENGUIN_DISTS 6 7 3.71 1.22 4 3.71 0.74 2.0 5.0 3.0 -0.55
## (7) HISTO_DIST 7 7 2.07 1.54 1 2.07 0.00 1.0 4.5 3.5 0.65
## (8) IXN_EBARS 8 7 2.43 1.48 2 2.43 1.48 1.0 4.5 3.5 0.51
## (9) IXN_SLOPE 9 7 2.57 1.69 2 2.57 1.48 1.0 4.5 3.5 0.16
## (10) BAYES_RIDGES 10 7 3.00 1.55 3 3.00 2.22 1.0 5.0 4.0 -0.17
## kurtosis se
## (1) CAREBEAR_BARS -2.11 0.72
## (2) LADY_LINE -0.64 0.53
## (3) BULLET_BARS -0.76 0.43
## (4) CARTOMAP -1.72 0.42
## (5) TAXMAN_INFO -1.86 0.18
## (6) PENGUIN_DISTS -1.66 0.46
## (7) HISTO_DIST -1.63 0.58
## (8) IXN_EBARS -1.67 0.56
## (9) IXN_SLOPE -2.13 0.64
## (10) BAYES_RIDGES -1.70 0.59
elementCor(p15)
##
## ############################
## Correlation between elements
## ############################
##
## Type of correlation: Cohens's rc (invariant to scale reflection)
##
## 1 2 3 4 5 6 7 8 9 10
## (1) CAREBEAR_BARS 1 0.08 0.38 -0.47 -0.32 -0.38 -0.23 -0.34 0.00 -0.34
## (2) LADY_LINE 2 0.77 0.41 0.52 -0.04 -0.94 -0.88 -0.72 -0.62
## (3) BULLET_BARS 3 -0.08 0.46 0.02 -0.74 -0.69 -0.33 -0.43
## (4) CARTOMAP 4 0.00 0.03 -0.20 -0.23 -0.21 -0.41
## (5) TAXMAN_INFO 5 0.53 -0.57 -0.45 -0.26 -0.15
## (6) PENGUIN_DISTS 6 0.14 0.27 0.41 0.43
## (7) HISTO_DIST 7 0.95 0.76 0.66
## (8) IXN_EBARS 8 0.68 0.85
## (9) IXN_SLOPE 9 0.38
## (10) BAYES_RIDGES 10